You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Terraform used the selected providers to generate the following execution
plan. Resource actions are indicated with the following symbols:+ create
<= read (data resources)
Terraform will perform the following actions:# data.kubernetes_secret.sa_credentials will be read during apply# (config refers to values not yet known)<=data"kubernetes_secret""sa_credentials" {
+data=(sensitive value)
+id=(known after apply)
+type=(known after apply)
+metadata {
+generation=(known after apply)
+name=(known after apply)
+namespace="default"+resource_version=(known after apply)
+self_link=(known after apply)
+uid=(known after apply)
}
}
# hcloud_network.k3s will be created+resource"hcloud_network""k3s" {
+id=(known after apply)
+ip_range="10.0.0.0/8"+name="k3s-network"
}
# hcloud_network_subnet.k3s_nodes will be created+resource"hcloud_network_subnet""k3s_nodes" {
+gateway=(known after apply)
+id=(known after apply)
+ip_range="10.254.1.0/24"+network_id=(known after apply)
+network_zone="eu-central"+type="server"
}
# hcloud_server.agents[0] will be created+resource"hcloud_server""agents" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "agent"+"nodepool" = "gpu"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-agent-0"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.agents[1] will be created+resource"hcloud_server""agents" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "agent"+"nodepool" = "general"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-agent-1"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.agents[2] will be created+resource"hcloud_server""agents" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "agent"+"nodepool" = "general"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-agent-2"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.control_planes[0] will be created+resource"hcloud_server""control_planes" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "control-plane"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-control-plane-0"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.control_planes[1] will be created+resource"hcloud_server""control_planes" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "control-plane"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-control-plane-1"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.control_planes[2] will be created+resource"hcloud_server""control_planes" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "control-plane"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-control-plane-2"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server_network.agents_network[0] will be created+resource"hcloud_server_network""agents_network" {
+id=(known after apply)
+ip="10.254.1.4"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.agents_network[1] will be created+resource"hcloud_server_network""agents_network" {
+id=(known after apply)
+ip="10.254.1.5"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.agents_network[2] will be created+resource"hcloud_server_network""agents_network" {
+id=(known after apply)
+ip="10.254.1.6"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.control_planes[0] will be created+resource"hcloud_server_network""control_planes" {
+id=(known after apply)
+ip="10.254.1.1"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.control_planes[1] will be created+resource"hcloud_server_network""control_planes" {
+id=(known after apply)
+ip="10.254.1.2"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.control_planes[2] will be created+resource"hcloud_server_network""control_planes" {
+id=(known after apply)
+ip="10.254.1.3"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_ssh_key.default will be created+resource"hcloud_ssh_key""default" {
+fingerprint=(known after apply)
+id=(known after apply)
+name="K3S terraform module - Provisionning SSH key"
}
# kubernetes_cluster_role_binding.boostrap will be created+resource"kubernetes_cluster_role_binding""boostrap" {
+id=(known after apply)
+metadata {
+generation=(known after apply)
+name="bootstrap"+resource_version=(known after apply)
+self_link=(known after apply)
+uid=(known after apply)
}
+role_ref {
+api_group="rbac.authorization.k8s.io"+kind="ClusterRole"+name="admin"
}
+subject {
+api_group=(known after apply)
+kind="ServiceAccount"+name="bootstrap"+namespace="default"
}
}
# kubernetes_service_account.bootstrap will be created+resource"kubernetes_service_account""bootstrap" {
+automount_service_account_token=true+default_secret_name=(known after apply)
+id=(known after apply)
+metadata {
+generation=(known after apply)
+name="bootstrap"+namespace="default"+resource_version=(known after apply)
+self_link=(known after apply)
+uid=(known after apply)
}
}
# module.k3s.null_resource.agents_drain["k3s-agent-0_node"] will be created+resource"null_resource""agents_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_drain["k3s-agent-1_node"] will be created+resource"null_resource""agents_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_drain["k3s-agent-2_node"] will be created+resource"null_resource""agents_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_install["k3s-agent-0_node"] will be created+resource"null_resource""agents_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "231c2099d0850d7cc82da54dad821067a7f24c4f"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.agents_install["k3s-agent-1_node"] will be created+resource"null_resource""agents_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "231c2099d0850d7cc82da54dad821067a7f24c4f"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.agents_install["k3s-agent-2_node"] will be created+resource"null_resource""agents_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "231c2099d0850d7cc82da54dad821067a7f24c4f"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.agents_label["k3s-agent-0_node|node.kubernetes.io/pool"] will be created+resource"null_resource""agents_label" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_label["k3s-agent-1_node|node.kubernetes.io/pool"] will be created+resource"null_resource""agents_label" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_label["k3s-agent-2_node|node.kubernetes.io/pool"] will be created+resource"null_resource""agents_label" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_taint["k3s-agent-0_node|dedicated"] will be created+resource"null_resource""agents_taint" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[0] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[1] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[2] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[3] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[4] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[5] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.kubernetes_ready will be created+resource"null_resource""kubernetes_ready" {
+id=(known after apply)
}
# module.k3s.null_resource.servers_drain["k3s-control-plane-0"] will be created+resource"null_resource""servers_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.servers_drain["k3s-control-plane-1"] will be created+resource"null_resource""servers_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.servers_drain["k3s-control-plane-2"] will be created+resource"null_resource""servers_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.servers_install["k3s-control-plane-0"] will be created+resource"null_resource""servers_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "5bbc5b363504fa478032f0d97c877f884a076d94"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.servers_install["k3s-control-plane-1"] will be created+resource"null_resource""servers_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "5bbc5b363504fa478032f0d97c877f884a076d94"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.servers_install["k3s-control-plane-2"] will be created+resource"null_resource""servers_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "5bbc5b363504fa478032f0d97c877f884a076d94"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.random_password.k3s_cluster_secret will be created+resource"random_password""k3s_cluster_secret" {
+id=(known after apply)
+length=48+lower=true+min_lower=0+min_numeric=0+min_special=0+min_upper=0+number=true+result=(sensitive value)
+special=false+upper=true
}
# module.k3s.tls_cert_request.master_user[0] will be created+resource"tls_cert_request""master_user" {
+cert_request_pem=(known after apply)
+id=(known after apply)
+key_algorithm="ECDSA"+private_key_pem=(sensitive value)
+subject {
+common_name="master-user"+organization="system:masters"
}
}
# module.k3s.tls_locally_signed_cert.master_user[0] will be created+resource"tls_locally_signed_cert""master_user" {
+allowed_uses=[
+"key_encipherment",
+"digital_signature",
+"client_auth",
]
+ca_cert_pem=(known after apply)
+ca_key_algorithm="ECDSA"+ca_private_key_pem=(sensitive value)
+cert_pem=(known after apply)
+cert_request_pem=(known after apply)
+early_renewal_hours=0+id=(known after apply)
+ready_for_renewal=true+validity_end_time=(known after apply)
+validity_period_hours=876600+validity_start_time=(known after apply)
}
# module.k3s.tls_private_key.kubernetes_ca[0] will be created+resource"tls_private_key""kubernetes_ca" {
+algorithm="ECDSA"+ecdsa_curve="P384"+id=(known after apply)
+private_key_pem=(sensitive value)
+public_key_fingerprint_md5=(known after apply)
+public_key_openssh=(known after apply)
+public_key_pem=(known after apply)
+rsa_bits=2048
}
# module.k3s.tls_private_key.kubernetes_ca[1] will be created+resource"tls_private_key""kubernetes_ca" {
+algorithm="ECDSA"+ecdsa_curve="P384"+id=(known after apply)
+private_key_pem=(sensitive value)
+public_key_fingerprint_md5=(known after apply)
+public_key_openssh=(known after apply)
+public_key_pem=(known after apply)
+rsa_bits=2048
}
# module.k3s.tls_private_key.kubernetes_ca[2] will be created+resource"tls_private_key""kubernetes_ca" {
+algorithm="ECDSA"+ecdsa_curve="P384"+id=(known after apply)
+private_key_pem=(sensitive value)
+public_key_fingerprint_md5=(known after apply)
+public_key_openssh=(known after apply)
+public_key_pem=(known after apply)
+rsa_bits=2048
}
# module.k3s.tls_private_key.master_user[0] will be created+resource"tls_private_key""master_user" {
+algorithm="ECDSA"+ecdsa_curve="P384"+id=(known after apply)
+private_key_pem=(sensitive value)
+public_key_fingerprint_md5=(known after apply)
+public_key_openssh=(known after apply)
+public_key_pem=(known after apply)
+rsa_bits=2048
}
# module.k3s.tls_self_signed_cert.kubernetes_ca_certs["0"] will be created+resource"tls_self_signed_cert""kubernetes_ca_certs" {
+allowed_uses=[
+"critical",
+"digitalSignature",
+"keyEncipherment",
+"keyCertSign",
]
+cert_pem=(known after apply)
+early_renewal_hours=0+id=(known after apply)
+is_ca_certificate=true+key_algorithm="ECDSA"+private_key_pem=(sensitive value)
+ready_for_renewal=true+validity_end_time=(known after apply)
+validity_period_hours=876600+validity_start_time=(known after apply)
+subject {
+common_name="kubernetes-client-ca"
}
}
# module.k3s.tls_self_signed_cert.kubernetes_ca_certs["1"] will be created+resource"tls_self_signed_cert""kubernetes_ca_certs" {
+allowed_uses=[
+"critical",
+"digitalSignature",
+"keyEncipherment",
+"keyCertSign",
]
+cert_pem=(known after apply)
+early_renewal_hours=0+id=(known after apply)
+is_ca_certificate=true+key_algorithm="ECDSA"+private_key_pem=(sensitive value)
+ready_for_renewal=true+validity_end_time=(known after apply)
+validity_period_hours=876600+validity_start_time=(known after apply)
+subject {
+common_name="kubernetes-server-ca"
}
}
# module.k3s.tls_self_signed_cert.kubernetes_ca_certs["2"] will be created+resource"tls_self_signed_cert""kubernetes_ca_certs" {
+allowed_uses=[
+"critical",
+"digitalSignature",
+"keyEncipherment",
+"keyCertSign",
]
+cert_pem=(known after apply)
+early_renewal_hours=0+id=(known after apply)
+is_ca_certificate=true+key_algorithm="ECDSA"+private_key_pem=(sensitive value)
+ready_for_renewal=true+validity_end_time=(known after apply)
+validity_period_hours=876600+validity_start_time=(known after apply)
+subject {
+common_name="kubernetes-request-header-key-ca"
}
}
Plan:50 to add, 0 to change, 0 to destroy.
Changes to Outputs:+bootstrap_sa=(sensitive value)
+summary={
+ agents = [
+ {
+ annotations = []
+ labels = {
+ node.kubernetes.io/pool ="gpu"
}
+ name ="k3s-agent-0"+ taints = {
+ dedicated ="gpu:NoSchedule"
}
},
+ {
+ annotations = []
+ labels = {
+ node.kubernetes.io/pool ="general"
}
+ name ="k3s-agent-1"+ taints = {
+ dedicated =null
}
},
+ {
+ annotations = []
+ labels = {
+ node.kubernetes.io/pool ="general"
}
+ name ="k3s-agent-2"+ taints = {
+ dedicated =null
}
},
]
+ servers = [
+ {
+ annotations = {
+ server_id =0
}
+ labels = []
+ name ="k3s-control-plane-0"+ taints = []
},
+ {
+ annotations = {
+ server_id =1
}
+ labels = []
+ name ="k3s-control-plane-1"+ taints = []
},
+ {
+ annotations = {
+ server_id =2
}
+ labels = []
+ name ="k3s-control-plane-2"+ taints = []
},
]
+ version ="v1.21.2+k3s1"
}
─────────────────────────────────────────────────────────────────────────────
Note: You didn't use the -out option to save this plan, so Terraform can't
guarantee to take exactly these actions if you run "terraform apply" now.
Terraform used the selected providers to generate the following execution
plan. Resource actions are indicated with the following symbols:+ create
<= read (data resources)
Terraform will perform the following actions:# data.kubernetes_secret.sa_credentials will be read during apply# (config refers to values not yet known)<=data"kubernetes_secret""sa_credentials" {
+data=(sensitive value)
+id=(known after apply)
+type=(known after apply)
+metadata {
+generation=(known after apply)
+name=(known after apply)
+namespace="default"+resource_version=(known after apply)
+self_link=(known after apply)
+uid=(known after apply)
}
}
# hcloud_network.k3s will be created+resource"hcloud_network""k3s" {
+id=(known after apply)
+ip_range="10.0.0.0/8"+name="k3s-network"
}
# hcloud_network_subnet.k3s_nodes will be created+resource"hcloud_network_subnet""k3s_nodes" {
+gateway=(known after apply)
+id=(known after apply)
+ip_range="10.254.1.0/24"+network_id=(known after apply)
+network_zone="eu-central"+type="server"
}
# hcloud_server.agents[0] will be created+resource"hcloud_server""agents" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "agent"+"nodepool" = "gpu"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-agent-0"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.agents[1] will be created+resource"hcloud_server""agents" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "agent"+"nodepool" = "general"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-agent-1"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.agents[2] will be created+resource"hcloud_server""agents" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "agent"+"nodepool" = "general"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-agent-2"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.control_planes[0] will be created+resource"hcloud_server""control_planes" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "control-plane"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-control-plane-0"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.control_planes[1] will be created+resource"hcloud_server""control_planes" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "control-plane"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-control-plane-1"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.control_planes[2] will be created+resource"hcloud_server""control_planes" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "control-plane"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-control-plane-2"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server_network.agents_network[0] will be created+resource"hcloud_server_network""agents_network" {
+id=(known after apply)
+ip="10.254.1.4"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.agents_network[1] will be created+resource"hcloud_server_network""agents_network" {
+id=(known after apply)
+ip="10.254.1.5"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.agents_network[2] will be created+resource"hcloud_server_network""agents_network" {
+id=(known after apply)
+ip="10.254.1.6"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.control_planes[0] will be created+resource"hcloud_server_network""control_planes" {
+id=(known after apply)
+ip="10.254.1.1"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.control_planes[1] will be created+resource"hcloud_server_network""control_planes" {
+id=(known after apply)
+ip="10.254.1.2"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.control_planes[2] will be created+resource"hcloud_server_network""control_planes" {
+id=(known after apply)
+ip="10.254.1.3"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_ssh_key.default will be created+resource"hcloud_ssh_key""default" {
+fingerprint=(known after apply)
+id=(known after apply)
+name="K3S terraform module - Provisionning SSH key"
}
# kubernetes_cluster_role_binding.boostrap will be created+resource"kubernetes_cluster_role_binding""boostrap" {
+id=(known after apply)
+metadata {
+generation=(known after apply)
+name="bootstrap"+resource_version=(known after apply)
+self_link=(known after apply)
+uid=(known after apply)
}
+role_ref {
+api_group="rbac.authorization.k8s.io"+kind="ClusterRole"+name="admin"
}
+subject {
+api_group=(known after apply)
+kind="ServiceAccount"+name="bootstrap"+namespace="default"
}
}
# kubernetes_service_account.bootstrap will be created+resource"kubernetes_service_account""bootstrap" {
+automount_service_account_token=true+default_secret_name=(known after apply)
+id=(known after apply)
+metadata {
+generation=(known after apply)
+name="bootstrap"+namespace="default"+resource_version=(known after apply)
+self_link=(known after apply)
+uid=(known after apply)
}
}
# module.k3s.null_resource.agents_drain["k3s-agent-0_node"] will be created+resource"null_resource""agents_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_drain["k3s-agent-1_node"] will be created+resource"null_resource""agents_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_drain["k3s-agent-2_node"] will be created+resource"null_resource""agents_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_install["k3s-agent-0_node"] will be created+resource"null_resource""agents_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "231c2099d0850d7cc82da54dad821067a7f24c4f"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.agents_install["k3s-agent-1_node"] will be created+resource"null_resource""agents_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "231c2099d0850d7cc82da54dad821067a7f24c4f"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.agents_install["k3s-agent-2_node"] will be created+resource"null_resource""agents_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "231c2099d0850d7cc82da54dad821067a7f24c4f"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.agents_label["k3s-agent-0_node|node.kubernetes.io/pool"] will be created+resource"null_resource""agents_label" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_label["k3s-agent-1_node|node.kubernetes.io/pool"] will be created+resource"null_resource""agents_label" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_label["k3s-agent-2_node|node.kubernetes.io/pool"] will be created+resource"null_resource""agents_label" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_taint["k3s-agent-0_node|dedicated"] will be created+resource"null_resource""agents_taint" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[0] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[1] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[2] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[3] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[4] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[5] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.kubernetes_ready will be created+resource"null_resource""kubernetes_ready" {
+id=(known after apply)
}
# module.k3s.null_resource.servers_drain["k3s-control-plane-0"] will be created+resource"null_resource""servers_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.servers_drain["k3s-control-plane-1"] will be created+resource"null_resource""servers_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.servers_drain["k3s-control-plane-2"] will be created+resource"null_resource""servers_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.servers_install["k3s-control-plane-0"] will be created+resource"null_resource""servers_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "5bbc5b363504fa478032f0d97c877f884a076d94"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.servers_install["k3s-control-plane-1"] will be created+resource"null_resource""servers_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "5bbc5b363504fa478032f0d97c877f884a076d94"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.servers_install["k3s-control-plane-2"] will be created+resource"null_resource""servers_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "5bbc5b363504fa478032f0d97c877f884a076d94"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.random_password.k3s_cluster_secret will be created+resource"random_password""k3s_cluster_secret" {
+id=(known after apply)
+length=48+lower=true+min_lower=0+min_numeric=0+min_special=0+min_upper=0+number=true+result=(sensitive value)
+special=false+upper=true
}
# module.k3s.tls_cert_request.master_user[0] will be created+resource"tls_cert_request""master_user" {
+cert_request_pem=(known after apply)
+id=(known after apply)
+key_algorithm="ECDSA"+private_key_pem=(sensitive value)
+subject {
+common_name="master-user"+organization="system:masters"
}
}
# module.k3s.tls_locally_signed_cert.master_user[0] will be created+resource"tls_locally_signed_cert""master_user" {
+allowed_uses=[
+"key_encipherment",
+"digital_signature",
+"client_auth",
]
+ca_cert_pem=(known after apply)
+ca_key_algorithm="ECDSA"+ca_private_key_pem=(sensitive value)
+cert_pem=(known after apply)
+cert_request_pem=(known after apply)
+early_renewal_hours=0+id=(known after apply)
+ready_for_renewal=true+validity_end_time=(known after apply)
+validity_period_hours=876600+validity_start_time=(known after apply)
}
# module.k3s.tls_private_key.kubernetes_ca[0] will be created+resource"tls_private_key""kubernetes_ca" {
+algorithm="ECDSA"+ecdsa_curve="P384"+id=(known after apply)
+private_key_pem=(sensitive value)
+public_key_fingerprint_md5=(known after apply)
+public_key_openssh=(known after apply)
+public_key_pem=(known after apply)
+rsa_bits=2048
}
# module.k3s.tls_private_key.kubernetes_ca[1] will be created+resource"tls_private_key""kubernetes_ca" {
+algorithm="ECDSA"+ecdsa_curve="P384"+id=(known after apply)
+private_key_pem=(sensitive value)
+public_key_fingerprint_md5=(known after apply)
+public_key_openssh=(known after apply)
+public_key_pem=(known after apply)
+rsa_bits=2048
}
# module.k3s.tls_private_key.kubernetes_ca[2] will be created+resource"tls_private_key""kubernetes_ca" {
+algorithm="ECDSA"+ecdsa_curve="P384"+id=(known after apply)
+private_key_pem=(sensitive value)
+public_key_fingerprint_md5=(known after apply)
+public_key_openssh=(known after apply)
+public_key_pem=(known after apply)
+rsa_bits=2048
}
# module.k3s.tls_private_key.master_user[0] will be created+resource"tls_private_key""master_user" {
+algorithm="ECDSA"+ecdsa_curve="P384"+id=(known after apply)
+private_key_pem=(sensitive value)
+public_key_fingerprint_md5=(known after apply)
+public_key_openssh=(known after apply)
+public_key_pem=(known after apply)
+rsa_bits=2048
}
# module.k3s.tls_self_signed_cert.kubernetes_ca_certs["0"] will be created+resource"tls_self_signed_cert""kubernetes_ca_certs" {
+allowed_uses=[
+"critical",
+"digitalSignature",
+"keyEncipherment",
+"keyCertSign",
]
+cert_pem=(known after apply)
+early_renewal_hours=0+id=(known after apply)
+is_ca_certificate=true+key_algorithm="ECDSA"+private_key_pem=(sensitive value)
+ready_for_renewal=true+validity_end_time=(known after apply)
+validity_period_hours=876600+validity_start_time=(known after apply)
+subject {
+common_name="kubernetes-client-ca"
}
}
# module.k3s.tls_self_signed_cert.kubernetes_ca_certs["1"] will be created+resource"tls_self_signed_cert""kubernetes_ca_certs" {
+allowed_uses=[
+"critical",
+"digitalSignature",
+"keyEncipherment",
+"keyCertSign",
]
+cert_pem=(known after apply)
+early_renewal_hours=0+id=(known after apply)
+is_ca_certificate=true+key_algorithm="ECDSA"+private_key_pem=(sensitive value)
+ready_for_renewal=true+validity_end_time=(known after apply)
+validity_period_hours=876600+validity_start_time=(known after apply)
+subject {
+common_name="kubernetes-server-ca"
}
}
# module.k3s.tls_self_signed_cert.kubernetes_ca_certs["2"] will be created+resource"tls_self_signed_cert""kubernetes_ca_certs" {
+allowed_uses=[
+"critical",
+"digitalSignature",
+"keyEncipherment",
+"keyCertSign",
]
+cert_pem=(known after apply)
+early_renewal_hours=0+id=(known after apply)
+is_ca_certificate=true+key_algorithm="ECDSA"+private_key_pem=(sensitive value)
+ready_for_renewal=true+validity_end_time=(known after apply)
+validity_period_hours=876600+validity_start_time=(known after apply)
+subject {
+common_name="kubernetes-request-header-key-ca"
}
}
Plan:50 to add, 0 to change, 0 to destroy.
Changes to Outputs:+bootstrap_sa=(sensitive value)
+summary={
+ agents = [
+ {
+ annotations = []
+ labels = {
+ node.kubernetes.io/pool ="gpu"
}
+ name ="k3s-agent-0"+ taints = {
+ dedicated ="gpu:NoSchedule"
}
},
+ {
+ annotations = []
+ labels = {
+ node.kubernetes.io/pool ="general"
}
+ name ="k3s-agent-1"+ taints = {
+ dedicated =null
}
},
+ {
+ annotations = []
+ labels = {
+ node.kubernetes.io/pool ="general"
}
+ name ="k3s-agent-2"+ taints = {
+ dedicated =null
}
},
]
+ servers = [
+ {
+ annotations = {
+ server_id =0
}
+ labels = []
+ name ="k3s-control-plane-0"+ taints = []
},
+ {
+ annotations = {
+ server_id =1
}
+ labels = []
+ name ="k3s-control-plane-1"+ taints = []
},
+ {
+ annotations = {
+ server_id =2
}
+ labels = []
+ name ="k3s-control-plane-2"+ taints = []
},
]
+ version ="v1.21.2+k3s1"
}
─────────────────────────────────────────────────────────────────────────────
Note: You didn't use the -out option to save this plan, so Terraform can't
guarantee to take exactly these actions if you run "terraform apply" now.
Terraform used the selected providers to generate the following execution
plan. Resource actions are indicated with the following symbols:+ create
<= read (data resources)
Terraform will perform the following actions:# data.kubernetes_secret.sa_credentials will be read during apply# (config refers to values not yet known)<=data"kubernetes_secret""sa_credentials" {
+data=(sensitive value)
+id=(known after apply)
+type=(known after apply)
+metadata {
+generation=(known after apply)
+name=(known after apply)
+namespace="default"+resource_version=(known after apply)
+self_link=(known after apply)
+uid=(known after apply)
}
}
# hcloud_network.k3s will be created+resource"hcloud_network""k3s" {
+id=(known after apply)
+ip_range="10.0.0.0/8"+name="k3s-network"
}
# hcloud_network_subnet.k3s_nodes will be created+resource"hcloud_network_subnet""k3s_nodes" {
+gateway=(known after apply)
+id=(known after apply)
+ip_range="10.254.1.0/24"+network_id=(known after apply)
+network_zone="eu-central"+type="server"
}
# hcloud_server.agents[0] will be created+resource"hcloud_server""agents" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "agent"+"nodepool" = "gpu"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-agent-0"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.agents[1] will be created+resource"hcloud_server""agents" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "agent"+"nodepool" = "general"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-agent-1"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.agents[2] will be created+resource"hcloud_server""agents" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "agent"+"nodepool" = "general"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-agent-2"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.control_planes[0] will be created+resource"hcloud_server""control_planes" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "control-plane"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-control-plane-0"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.control_planes[1] will be created+resource"hcloud_server""control_planes" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "control-plane"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-control-plane-1"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.control_planes[2] will be created+resource"hcloud_server""control_planes" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "control-plane"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-control-plane-2"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server_network.agents_network[0] will be created+resource"hcloud_server_network""agents_network" {
+id=(known after apply)
+ip="10.254.1.4"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.agents_network[1] will be created+resource"hcloud_server_network""agents_network" {
+id=(known after apply)
+ip="10.254.1.5"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.agents_network[2] will be created+resource"hcloud_server_network""agents_network" {
+id=(known after apply)
+ip="10.254.1.6"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.control_planes[0] will be created+resource"hcloud_server_network""control_planes" {
+id=(known after apply)
+ip="10.254.1.1"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.control_planes[1] will be created+resource"hcloud_server_network""control_planes" {
+id=(known after apply)
+ip="10.254.1.2"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.control_planes[2] will be created+resource"hcloud_server_network""control_planes" {
+id=(known after apply)
+ip="10.254.1.3"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_ssh_key.default will be created+resource"hcloud_ssh_key""default" {
+fingerprint=(known after apply)
+id=(known after apply)
+name="K3S terraform module - Provisionning SSH key"
}
# kubernetes_cluster_role_binding.boostrap will be created+resource"kubernetes_cluster_role_binding""boostrap" {
+id=(known after apply)
+metadata {
+generation=(known after apply)
+name="bootstrap"+resource_version=(known after apply)
+self_link=(known after apply)
+uid=(known after apply)
}
+role_ref {
+api_group="rbac.authorization.k8s.io"+kind="ClusterRole"+name="admin"
}
+subject {
+api_group=(known after apply)
+kind="ServiceAccount"+name="bootstrap"+namespace="default"
}
}
# kubernetes_service_account.bootstrap will be created+resource"kubernetes_service_account""bootstrap" {
+automount_service_account_token=true+default_secret_name=(known after apply)
+id=(known after apply)
+metadata {
+generation=(known after apply)
+name="bootstrap"+namespace="default"+resource_version=(known after apply)
+self_link=(known after apply)
+uid=(known after apply)
}
}
# module.k3s.null_resource.agents_drain["k3s-agent-0_node"] will be created+resource"null_resource""agents_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_drain["k3s-agent-1_node"] will be created+resource"null_resource""agents_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_drain["k3s-agent-2_node"] will be created+resource"null_resource""agents_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_install["k3s-agent-0_node"] will be created+resource"null_resource""agents_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "231c2099d0850d7cc82da54dad821067a7f24c4f"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.agents_install["k3s-agent-1_node"] will be created+resource"null_resource""agents_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "231c2099d0850d7cc82da54dad821067a7f24c4f"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.agents_install["k3s-agent-2_node"] will be created+resource"null_resource""agents_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "231c2099d0850d7cc82da54dad821067a7f24c4f"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.agents_label["k3s-agent-0_node|node.kubernetes.io/pool"] will be created+resource"null_resource""agents_label" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_label["k3s-agent-1_node|node.kubernetes.io/pool"] will be created+resource"null_resource""agents_label" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_label["k3s-agent-2_node|node.kubernetes.io/pool"] will be created+resource"null_resource""agents_label" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_taint["k3s-agent-0_node|dedicated"] will be created+resource"null_resource""agents_taint" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[0] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[1] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[2] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[3] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[4] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[5] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.kubernetes_ready will be created+resource"null_resource""kubernetes_ready" {
+id=(known after apply)
}
# module.k3s.null_resource.servers_drain["k3s-control-plane-0"] will be created+resource"null_resource""servers_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.servers_drain["k3s-control-plane-1"] will be created+resource"null_resource""servers_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.servers_drain["k3s-control-plane-2"] will be created+resource"null_resource""servers_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.servers_install["k3s-control-plane-0"] will be created+resource"null_resource""servers_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "5bbc5b363504fa478032f0d97c877f884a076d94"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.servers_install["k3s-control-plane-1"] will be created+resource"null_resource""servers_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "5bbc5b363504fa478032f0d97c877f884a076d94"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.servers_install["k3s-control-plane-2"] will be created+resource"null_resource""servers_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "5bbc5b363504fa478032f0d97c877f884a076d94"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.random_password.k3s_cluster_secret will be created+resource"random_password""k3s_cluster_secret" {
+id=(known after apply)
+length=48+lower=true+min_lower=0+min_numeric=0+min_special=0+min_upper=0+number=true+result=(sensitive value)
+special=false+upper=true
}
# module.k3s.tls_cert_request.master_user[0] will be created+resource"tls_cert_request""master_user" {
+cert_request_pem=(known after apply)
+id=(known after apply)
+key_algorithm="ECDSA"+private_key_pem=(sensitive value)
+subject {
+common_name="master-user"+organization="system:masters"
}
}
# module.k3s.tls_locally_signed_cert.master_user[0] will be created+resource"tls_locally_signed_cert""master_user" {
+allowed_uses=[
+"key_encipherment",
+"digital_signature",
+"client_auth",
]
+ca_cert_pem=(known after apply)
+ca_key_algorithm="ECDSA"+ca_private_key_pem=(sensitive value)
+cert_pem=(known after apply)
+cert_request_pem=(known after apply)
+early_renewal_hours=0+id=(known after apply)
+ready_for_renewal=true+validity_end_time=(known after apply)
+validity_period_hours=876600+validity_start_time=(known after apply)
}
# module.k3s.tls_private_key.kubernetes_ca[0] will be created+resource"tls_private_key""kubernetes_ca" {
+algorithm="ECDSA"+ecdsa_curve="P384"+id=(known after apply)
+private_key_pem=(sensitive value)
+public_key_fingerprint_md5=(known after apply)
+public_key_openssh=(known after apply)
+public_key_pem=(known after apply)
+rsa_bits=2048
}
# module.k3s.tls_private_key.kubernetes_ca[1] will be created+resource"tls_private_key""kubernetes_ca" {
+algorithm="ECDSA"+ecdsa_curve="P384"+id=(known after apply)
+private_key_pem=(sensitive value)
+public_key_fingerprint_md5=(known after apply)
+public_key_openssh=(known after apply)
+public_key_pem=(known after apply)
+rsa_bits=2048
}
# module.k3s.tls_private_key.kubernetes_ca[2] will be created+resource"tls_private_key""kubernetes_ca" {
+algorithm="ECDSA"+ecdsa_curve="P384"+id=(known after apply)
+private_key_pem=(sensitive value)
+public_key_fingerprint_md5=(known after apply)
+public_key_openssh=(known after apply)
+public_key_pem=(known after apply)
+rsa_bits=2048
}
# module.k3s.tls_private_key.master_user[0] will be created+resource"tls_private_key""master_user" {
+algorithm="ECDSA"+ecdsa_curve="P384"+id=(known after apply)
+private_key_pem=(sensitive value)
+public_key_fingerprint_md5=(known after apply)
+public_key_openssh=(known after apply)
+public_key_pem=(known after apply)
+rsa_bits=2048
}
# module.k3s.tls_self_signed_cert.kubernetes_ca_certs["0"] will be created+resource"tls_self_signed_cert""kubernetes_ca_certs" {
+allowed_uses=[
+"critical",
+"digitalSignature",
+"keyEncipherment",
+"keyCertSign",
]
+cert_pem=(known after apply)
+early_renewal_hours=0+id=(known after apply)
+is_ca_certificate=true+key_algorithm="ECDSA"+private_key_pem=(sensitive value)
+ready_for_renewal=true+validity_end_time=(known after apply)
+validity_period_hours=876600+validity_start_time=(known after apply)
+subject {
+common_name="kubernetes-client-ca"
}
}
# module.k3s.tls_self_signed_cert.kubernetes_ca_certs["1"] will be created+resource"tls_self_signed_cert""kubernetes_ca_certs" {
+allowed_uses=[
+"critical",
+"digitalSignature",
+"keyEncipherment",
+"keyCertSign",
]
+cert_pem=(known after apply)
+early_renewal_hours=0+id=(known after apply)
+is_ca_certificate=true+key_algorithm="ECDSA"+private_key_pem=(sensitive value)
+ready_for_renewal=true+validity_end_time=(known after apply)
+validity_period_hours=876600+validity_start_time=(known after apply)
+subject {
+common_name="kubernetes-server-ca"
}
}
# module.k3s.tls_self_signed_cert.kubernetes_ca_certs["2"] will be created+resource"tls_self_signed_cert""kubernetes_ca_certs" {
+allowed_uses=[
+"critical",
+"digitalSignature",
+"keyEncipherment",
+"keyCertSign",
]
+cert_pem=(known after apply)
+early_renewal_hours=0+id=(known after apply)
+is_ca_certificate=true+key_algorithm="ECDSA"+private_key_pem=(sensitive value)
+ready_for_renewal=true+validity_end_time=(known after apply)
+validity_period_hours=876600+validity_start_time=(known after apply)
+subject {
+common_name="kubernetes-request-header-key-ca"
}
}
Plan:50 to add, 0 to change, 0 to destroy.
Changes to Outputs:+bootstrap_sa=(sensitive value)
+summary={
+ agents = [
+ {
+ annotations = []
+ labels = {
+ node.kubernetes.io/pool ="gpu"
}
+ name ="k3s-agent-0"+ taints = {
+ dedicated ="gpu:NoSchedule"
}
},
+ {
+ annotations = []
+ labels = {
+ node.kubernetes.io/pool ="general"
}
+ name ="k3s-agent-1"+ taints = {
+ dedicated =null
}
},
+ {
+ annotations = []
+ labels = {
+ node.kubernetes.io/pool ="general"
}
+ name ="k3s-agent-2"+ taints = {
+ dedicated =null
}
},
]
+ servers = [
+ {
+ annotations = {
+ server_id =0
}
+ labels = []
+ name ="k3s-control-plane-0"+ taints = []
},
+ {
+ annotations = {
+ server_id =1
}
+ labels = []
+ name ="k3s-control-plane-1"+ taints = []
},
+ {
+ annotations = {
+ server_id =2
}
+ labels = []
+ name ="k3s-control-plane-2"+ taints = []
},
]
+ version ="v1.21.2+k3s1"
}
─────────────────────────────────────────────────────────────────────────────
Note: You didn't use the -out option to save this plan, so Terraform can't
guarantee to take exactly these actions if you run "terraform apply" now.
variables.tf
--- old/variables.tf+++ new/variables.tf@@ -15,7 +15,7 @@
default = "cluster.local"
validation {
- condition = false+ condition = false
error_message = "Variable `name` is deprecated, use `cluster_domain` instead. It will be removed at the next major release."
}
}
The Terraform configuration must be valid before initialization so that
Terraform can determine which modules and providers need to be installed.
Error: Invalid variable validation condition
on ../../variables.tf line 18, in variable "name":
18: condition = false
The condition for variable "name" must refer to var.name in order to test
incoming values.
The Terraform configuration must be valid before initialization so that
Terraform can determine which modules and providers need to be installed.
Error: Invalid variable validation condition
on ../../variables.tf line 18, in variable "name":
18: condition = false
The condition for variable "name" must refer to var.name in order to test
incoming values.
Error: Invalid value for variable
on k3s.tf line 6, in module "k3s":
6: name = "cluster.local"
Variable `name` is deprecated, use `cluster_domain` instead. It will be
removed at the next major release.
This was checked by the validation rule at ../../variables.tf:17,3-13.
Terraform used the selected providers to generate the following execution
plan. Resource actions are indicated with the following symbols:+ create
<= read (data resources)
Terraform will perform the following actions:# data.kubernetes_secret.sa_credentials will be read during apply# (config refers to values not yet known)<=data"kubernetes_secret""sa_credentials" {
+data=(sensitive value)
+id=(known after apply)
+type=(known after apply)
+metadata {
+generation=(known after apply)
+name=(known after apply)
+namespace="default"+resource_version=(known after apply)
+self_link=(known after apply)
+uid=(known after apply)
}
}
# hcloud_network.k3s will be created+resource"hcloud_network""k3s" {
+id=(known after apply)
+ip_range="10.0.0.0/8"+name="k3s-network"
}
# hcloud_network_subnet.k3s_nodes will be created+resource"hcloud_network_subnet""k3s_nodes" {
+gateway=(known after apply)
+id=(known after apply)
+ip_range="10.254.1.0/24"+network_id=(known after apply)
+network_zone="eu-central"+type="server"
}
# hcloud_server.agents[0] will be created+resource"hcloud_server""agents" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "agent"+"nodepool" = "gpu"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-agent-0"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.agents[1] will be created+resource"hcloud_server""agents" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "agent"+"nodepool" = "general"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-agent-1"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.agents[2] will be created+resource"hcloud_server""agents" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "agent"+"nodepool" = "general"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-agent-2"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.control_planes[0] will be created+resource"hcloud_server""control_planes" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "control-plane"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-control-plane-0"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.control_planes[1] will be created+resource"hcloud_server""control_planes" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "control-plane"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-control-plane-1"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.control_planes[2] will be created+resource"hcloud_server""control_planes" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "control-plane"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-control-plane-2"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server_network.agents_network[0] will be created+resource"hcloud_server_network""agents_network" {
+id=(known after apply)
+ip="10.254.1.4"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.agents_network[1] will be created+resource"hcloud_server_network""agents_network" {
+id=(known after apply)
+ip="10.254.1.5"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.agents_network[2] will be created+resource"hcloud_server_network""agents_network" {
+id=(known after apply)
+ip="10.254.1.6"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.control_planes[0] will be created+resource"hcloud_server_network""control_planes" {
+id=(known after apply)
+ip="10.254.1.1"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.control_planes[1] will be created+resource"hcloud_server_network""control_planes" {
+id=(known after apply)
+ip="10.254.1.2"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.control_planes[2] will be created+resource"hcloud_server_network""control_planes" {
+id=(known after apply)
+ip="10.254.1.3"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_ssh_key.default will be created+resource"hcloud_ssh_key""default" {
+fingerprint=(known after apply)
+id=(known after apply)
+name="K3S terraform module - Provisionning SSH key"
}
# kubernetes_cluster_role_binding.boostrap will be created+resource"kubernetes_cluster_role_binding""boostrap" {
+id=(known after apply)
+metadata {
+generation=(known after apply)
+name="bootstrap"+resource_version=(known after apply)
+self_link=(known after apply)
+uid=(known after apply)
}
+role_ref {
+api_group="rbac.authorization.k8s.io"+kind="ClusterRole"+name="admin"
}
+subject {
+api_group=(known after apply)
+kind="ServiceAccount"+name="bootstrap"+namespace="default"
}
}
# kubernetes_service_account.bootstrap will be created+resource"kubernetes_service_account""bootstrap" {
+automount_service_account_token=true+default_secret_name=(known after apply)
+id=(known after apply)
+metadata {
+generation=(known after apply)
+name="bootstrap"+namespace="default"+resource_version=(known after apply)
+self_link=(known after apply)
+uid=(known after apply)
}
}
# module.k3s.null_resource.agents_drain["k3s-agent-0_node"] will be created+resource"null_resource""agents_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_drain["k3s-agent-1_node"] will be created+resource"null_resource""agents_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_drain["k3s-agent-2_node"] will be created+resource"null_resource""agents_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_install["k3s-agent-0_node"] will be created+resource"null_resource""agents_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "231c2099d0850d7cc82da54dad821067a7f24c4f"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.agents_install["k3s-agent-1_node"] will be created+resource"null_resource""agents_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "231c2099d0850d7cc82da54dad821067a7f24c4f"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.agents_install["k3s-agent-2_node"] will be created+resource"null_resource""agents_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "231c2099d0850d7cc82da54dad821067a7f24c4f"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.agents_label["k3s-agent-0_node|node.kubernetes.io/pool"] will be created+resource"null_resource""agents_label" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_label["k3s-agent-1_node|node.kubernetes.io/pool"] will be created+resource"null_resource""agents_label" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_label["k3s-agent-2_node|node.kubernetes.io/pool"] will be created+resource"null_resource""agents_label" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_taint["k3s-agent-0_node|dedicated"] will be created+resource"null_resource""agents_taint" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[0] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[1] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[2] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[3] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[4] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[5] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.kubernetes_ready will be created+resource"null_resource""kubernetes_ready" {
+id=(known after apply)
}
# module.k3s.null_resource.servers_drain["k3s-control-plane-0"] will be created+resource"null_resource""servers_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.servers_drain["k3s-control-plane-1"] will be created+resource"null_resource""servers_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.servers_drain["k3s-control-plane-2"] will be created+resource"null_resource""servers_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.servers_install["k3s-control-plane-0"] will be created+resource"null_resource""servers_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "5bbc5b363504fa478032f0d97c877f884a076d94"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.servers_install["k3s-control-plane-1"] will be created+resource"null_resource""servers_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "5bbc5b363504fa478032f0d97c877f884a076d94"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.servers_install["k3s-control-plane-2"] will be created+resource"null_resource""servers_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "5bbc5b363504fa478032f0d97c877f884a076d94"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.random_password.k3s_cluster_secret will be created+resource"random_password""k3s_cluster_secret" {
+id=(known after apply)
+length=48+lower=true+min_lower=0+min_numeric=0+min_special=0+min_upper=0+number=true+result=(sensitive value)
+special=false+upper=true
}
# module.k3s.tls_cert_request.master_user[0] will be created+resource"tls_cert_request""master_user" {
+cert_request_pem=(known after apply)
+id=(known after apply)
+key_algorithm="ECDSA"+private_key_pem=(sensitive value)
+subject {
+common_name="master-user"+organization="system:masters"
}
}
# module.k3s.tls_locally_signed_cert.master_user[0] will be created+resource"tls_locally_signed_cert""master_user" {
+allowed_uses=[
+"key_encipherment",
+"digital_signature",
+"client_auth",
]
+ca_cert_pem=(known after apply)
+ca_key_algorithm="ECDSA"+ca_private_key_pem=(sensitive value)
+cert_pem=(known after apply)
+cert_request_pem=(known after apply)
+early_renewal_hours=0+id=(known after apply)
+ready_for_renewal=true+validity_end_time=(known after apply)
+validity_period_hours=876600+validity_start_time=(known after apply)
}
# module.k3s.tls_private_key.kubernetes_ca[0] will be created+resource"tls_private_key""kubernetes_ca" {
+algorithm="ECDSA"+ecdsa_curve="P384"+id=(known after apply)
+private_key_pem=(sensitive value)
+public_key_fingerprint_md5=(known after apply)
+public_key_openssh=(known after apply)
+public_key_pem=(known after apply)
+rsa_bits=2048
}
# module.k3s.tls_private_key.kubernetes_ca[1] will be created+resource"tls_private_key""kubernetes_ca" {
+algorithm="ECDSA"+ecdsa_curve="P384"+id=(known after apply)
+private_key_pem=(sensitive value)
+public_key_fingerprint_md5=(known after apply)
+public_key_openssh=(known after apply)
+public_key_pem=(known after apply)
+rsa_bits=2048
}
# module.k3s.tls_private_key.kubernetes_ca[2] will be created+resource"tls_private_key""kubernetes_ca" {
+algorithm="ECDSA"+ecdsa_curve="P384"+id=(known after apply)
+private_key_pem=(sensitive value)
+public_key_fingerprint_md5=(known after apply)
+public_key_openssh=(known after apply)
+public_key_pem=(known after apply)
+rsa_bits=2048
}
# module.k3s.tls_private_key.master_user[0] will be created+resource"tls_private_key""master_user" {
+algorithm="ECDSA"+ecdsa_curve="P384"+id=(known after apply)
+private_key_pem=(sensitive value)
+public_key_fingerprint_md5=(known after apply)
+public_key_openssh=(known after apply)
+public_key_pem=(known after apply)
+rsa_bits=2048
}
# module.k3s.tls_self_signed_cert.kubernetes_ca_certs["0"] will be created+resource"tls_self_signed_cert""kubernetes_ca_certs" {
+allowed_uses=[
+"critical",
+"digitalSignature",
+"keyEncipherment",
+"keyCertSign",
]
+cert_pem=(known after apply)
+early_renewal_hours=0+id=(known after apply)
+is_ca_certificate=true+key_algorithm="ECDSA"+private_key_pem=(sensitive value)
+ready_for_renewal=true+validity_end_time=(known after apply)
+validity_period_hours=876600+validity_start_time=(known after apply)
+subject {
+common_name="kubernetes-client-ca"
}
}
# module.k3s.tls_self_signed_cert.kubernetes_ca_certs["1"] will be created+resource"tls_self_signed_cert""kubernetes_ca_certs" {
+allowed_uses=[
+"critical",
+"digitalSignature",
+"keyEncipherment",
+"keyCertSign",
]
+cert_pem=(known after apply)
+early_renewal_hours=0+id=(known after apply)
+is_ca_certificate=true+key_algorithm="ECDSA"+private_key_pem=(sensitive value)
+ready_for_renewal=true+validity_end_time=(known after apply)
+validity_period_hours=876600+validity_start_time=(known after apply)
+subject {
+common_name="kubernetes-server-ca"
}
}
# module.k3s.tls_self_signed_cert.kubernetes_ca_certs["2"] will be created+resource"tls_self_signed_cert""kubernetes_ca_certs" {
+allowed_uses=[
+"critical",
+"digitalSignature",
+"keyEncipherment",
+"keyCertSign",
]
+cert_pem=(known after apply)
+early_renewal_hours=0+id=(known after apply)
+is_ca_certificate=true+key_algorithm="ECDSA"+private_key_pem=(sensitive value)
+ready_for_renewal=true+validity_end_time=(known after apply)
+validity_period_hours=876600+validity_start_time=(known after apply)
+subject {
+common_name="kubernetes-request-header-key-ca"
}
}
Plan:50 to add, 0 to change, 0 to destroy.
Changes to Outputs:+bootstrap_sa=(sensitive value)
+summary={
+ agents = [
+ {
+ annotations = []
+ labels = {
+ node.kubernetes.io/pool ="gpu"
}
+ name ="k3s-agent-0"+ taints = {
+ dedicated ="gpu:NoSchedule"
}
},
+ {
+ annotations = []
+ labels = {
+ node.kubernetes.io/pool ="general"
}
+ name ="k3s-agent-1"+ taints = {
+ dedicated =null
}
},
+ {
+ annotations = []
+ labels = {
+ node.kubernetes.io/pool ="general"
}
+ name ="k3s-agent-2"+ taints = {
+ dedicated =null
}
},
]
+ servers = [
+ {
+ annotations = {
+ server_id =0
}
+ labels = []
+ name ="k3s-control-plane-0"+ taints = []
},
+ {
+ annotations = {
+ server_id =1
}
+ labels = []
+ name ="k3s-control-plane-1"+ taints = []
},
+ {
+ annotations = {
+ server_id =2
}
+ labels = []
+ name ="k3s-control-plane-2"+ taints = []
},
]
+ version ="v1.21.2+k3s1"
}
─────────────────────────────────────────────────────────────────────────────
Note: You didn't use the -out option to save this plan, so Terraform can't
guarantee to take exactly these actions if you run "terraform apply" now.
Terraform used the selected providers to generate the following execution
plan. Resource actions are indicated with the following symbols:+ create
<= read (data resources)
Terraform will perform the following actions:# data.kubernetes_secret.sa_credentials will be read during apply# (config refers to values not yet known)<=data"kubernetes_secret""sa_credentials" {
+data=(sensitive value)
+id=(known after apply)
+type=(known after apply)
+metadata {
+generation=(known after apply)
+name=(known after apply)
+namespace="default"+resource_version=(known after apply)
+self_link=(known after apply)
+uid=(known after apply)
}
}
# hcloud_network.k3s will be created+resource"hcloud_network""k3s" {
+id=(known after apply)
+ip_range="10.0.0.0/8"+name="k3s-network"
}
# hcloud_network_subnet.k3s_nodes will be created+resource"hcloud_network_subnet""k3s_nodes" {
+gateway=(known after apply)
+id=(known after apply)
+ip_range="10.254.1.0/24"+network_id=(known after apply)
+network_zone="eu-central"+type="server"
}
# hcloud_server.agents[0] will be created+resource"hcloud_server""agents" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "agent"+"nodepool" = "gpu"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-agent-0"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.agents[1] will be created+resource"hcloud_server""agents" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "agent"+"nodepool" = "general"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-agent-1"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.agents[2] will be created+resource"hcloud_server""agents" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "agent"+"nodepool" = "general"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-agent-2"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.control_planes[0] will be created+resource"hcloud_server""control_planes" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "control-plane"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-control-plane-0"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.control_planes[1] will be created+resource"hcloud_server""control_planes" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "control-plane"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-control-plane-1"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.control_planes[2] will be created+resource"hcloud_server""control_planes" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "control-plane"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-control-plane-2"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server_network.agents_network[0] will be created+resource"hcloud_server_network""agents_network" {
+id=(known after apply)
+ip="10.254.1.4"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.agents_network[1] will be created+resource"hcloud_server_network""agents_network" {
+id=(known after apply)
+ip="10.254.1.5"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.agents_network[2] will be created+resource"hcloud_server_network""agents_network" {
+id=(known after apply)
+ip="10.254.1.6"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.control_planes[0] will be created+resource"hcloud_server_network""control_planes" {
+id=(known after apply)
+ip="10.254.1.1"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.control_planes[1] will be created+resource"hcloud_server_network""control_planes" {
+id=(known after apply)
+ip="10.254.1.2"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.control_planes[2] will be created+resource"hcloud_server_network""control_planes" {
+id=(known after apply)
+ip="10.254.1.3"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_ssh_key.default will be created+resource"hcloud_ssh_key""default" {
+fingerprint=(known after apply)
+id=(known after apply)
+name="K3S terraform module - Provisionning SSH key"
}
# kubernetes_cluster_role_binding.boostrap will be created+resource"kubernetes_cluster_role_binding""boostrap" {
+id=(known after apply)
+metadata {
+generation=(known after apply)
+name="bootstrap"+resource_version=(known after apply)
+self_link=(known after apply)
+uid=(known after apply)
}
+role_ref {
+api_group="rbac.authorization.k8s.io"+kind="ClusterRole"+name="admin"
}
+subject {
+api_group=(known after apply)
+kind="ServiceAccount"+name="bootstrap"+namespace="default"
}
}
# kubernetes_service_account.bootstrap will be created+resource"kubernetes_service_account""bootstrap" {
+automount_service_account_token=true+default_secret_name=(known after apply)
+id=(known after apply)
+metadata {
+generation=(known after apply)
+name="bootstrap"+namespace="default"+resource_version=(known after apply)
+self_link=(known after apply)
+uid=(known after apply)
}
}
# module.k3s.null_resource.agents_drain["k3s-agent-0_node"] will be created+resource"null_resource""agents_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_drain["k3s-agent-1_node"] will be created+resource"null_resource""agents_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_drain["k3s-agent-2_node"] will be created+resource"null_resource""agents_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_install["k3s-agent-0_node"] will be created+resource"null_resource""agents_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "231c2099d0850d7cc82da54dad821067a7f24c4f"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.agents_install["k3s-agent-1_node"] will be created+resource"null_resource""agents_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "231c2099d0850d7cc82da54dad821067a7f24c4f"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.agents_install["k3s-agent-2_node"] will be created+resource"null_resource""agents_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "231c2099d0850d7cc82da54dad821067a7f24c4f"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.agents_label["k3s-agent-0_node|node.kubernetes.io/pool"] will be created+resource"null_resource""agents_label" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_label["k3s-agent-1_node|node.kubernetes.io/pool"] will be created+resource"null_resource""agents_label" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_label["k3s-agent-2_node|node.kubernetes.io/pool"] will be created+resource"null_resource""agents_label" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_taint["k3s-agent-0_node|dedicated"] will be created+resource"null_resource""agents_taint" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[0] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[1] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[2] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[3] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[4] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[5] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.kubernetes_ready will be created+resource"null_resource""kubernetes_ready" {
+id=(known after apply)
}
# module.k3s.null_resource.servers_drain["k3s-control-plane-0"] will be created+resource"null_resource""servers_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.servers_drain["k3s-control-plane-1"] will be created+resource"null_resource""servers_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.servers_drain["k3s-control-plane-2"] will be created+resource"null_resource""servers_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.servers_install["k3s-control-plane-0"] will be created+resource"null_resource""servers_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "5bbc5b363504fa478032f0d97c877f884a076d94"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.servers_install["k3s-control-plane-1"] will be created+resource"null_resource""servers_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "5bbc5b363504fa478032f0d97c877f884a076d94"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.servers_install["k3s-control-plane-2"] will be created+resource"null_resource""servers_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "5bbc5b363504fa478032f0d97c877f884a076d94"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.random_password.k3s_cluster_secret will be created+resource"random_password""k3s_cluster_secret" {
+id=(known after apply)
+length=48+lower=true+min_lower=0+min_numeric=0+min_special=0+min_upper=0+number=true+result=(sensitive value)
+special=false+upper=true
}
# module.k3s.tls_cert_request.master_user[0] will be created+resource"tls_cert_request""master_user" {
+cert_request_pem=(known after apply)
+id=(known after apply)
+key_algorithm="ECDSA"+private_key_pem=(sensitive value)
+subject {
+common_name="master-user"+organization="system:masters"
}
}
# module.k3s.tls_locally_signed_cert.master_user[0] will be created+resource"tls_locally_signed_cert""master_user" {
+allowed_uses=[
+"key_encipherment",
+"digital_signature",
+"client_auth",
]
+ca_cert_pem=(known after apply)
+ca_key_algorithm="ECDSA"+ca_private_key_pem=(sensitive value)
+cert_pem=(known after apply)
+cert_request_pem=(known after apply)
+early_renewal_hours=0+id=(known after apply)
+ready_for_renewal=true+validity_end_time=(known after apply)
+validity_period_hours=876600+validity_start_time=(known after apply)
}
# module.k3s.tls_private_key.kubernetes_ca[0] will be created+resource"tls_private_key""kubernetes_ca" {
+algorithm="ECDSA"+ecdsa_curve="P384"+id=(known after apply)
+private_key_pem=(sensitive value)
+public_key_fingerprint_md5=(known after apply)
+public_key_openssh=(known after apply)
+public_key_pem=(known after apply)
+rsa_bits=2048
}
# module.k3s.tls_private_key.kubernetes_ca[1] will be created+resource"tls_private_key""kubernetes_ca" {
+algorithm="ECDSA"+ecdsa_curve="P384"+id=(known after apply)
+private_key_pem=(sensitive value)
+public_key_fingerprint_md5=(known after apply)
+public_key_openssh=(known after apply)
+public_key_pem=(known after apply)
+rsa_bits=2048
}
# module.k3s.tls_private_key.kubernetes_ca[2] will be created+resource"tls_private_key""kubernetes_ca" {
+algorithm="ECDSA"+ecdsa_curve="P384"+id=(known after apply)
+private_key_pem=(sensitive value)
+public_key_fingerprint_md5=(known after apply)
+public_key_openssh=(known after apply)
+public_key_pem=(known after apply)
+rsa_bits=2048
}
# module.k3s.tls_private_key.master_user[0] will be created+resource"tls_private_key""master_user" {
+algorithm="ECDSA"+ecdsa_curve="P384"+id=(known after apply)
+private_key_pem=(sensitive value)
+public_key_fingerprint_md5=(known after apply)
+public_key_openssh=(known after apply)
+public_key_pem=(known after apply)
+rsa_bits=2048
}
# module.k3s.tls_self_signed_cert.kubernetes_ca_certs["0"] will be created+resource"tls_self_signed_cert""kubernetes_ca_certs" {
+allowed_uses=[
+"critical",
+"digitalSignature",
+"keyEncipherment",
+"keyCertSign",
]
+cert_pem=(known after apply)
+early_renewal_hours=0+id=(known after apply)
+is_ca_certificate=true+key_algorithm="ECDSA"+private_key_pem=(sensitive value)
+ready_for_renewal=true+validity_end_time=(known after apply)
+validity_period_hours=876600+validity_start_time=(known after apply)
+subject {
+common_name="kubernetes-client-ca"
}
}
# module.k3s.tls_self_signed_cert.kubernetes_ca_certs["1"] will be created+resource"tls_self_signed_cert""kubernetes_ca_certs" {
+allowed_uses=[
+"critical",
+"digitalSignature",
+"keyEncipherment",
+"keyCertSign",
]
+cert_pem=(known after apply)
+early_renewal_hours=0+id=(known after apply)
+is_ca_certificate=true+key_algorithm="ECDSA"+private_key_pem=(sensitive value)
+ready_for_renewal=true+validity_end_time=(known after apply)
+validity_period_hours=876600+validity_start_time=(known after apply)
+subject {
+common_name="kubernetes-server-ca"
}
}
# module.k3s.tls_self_signed_cert.kubernetes_ca_certs["2"] will be created+resource"tls_self_signed_cert""kubernetes_ca_certs" {
+allowed_uses=[
+"critical",
+"digitalSignature",
+"keyEncipherment",
+"keyCertSign",
]
+cert_pem=(known after apply)
+early_renewal_hours=0+id=(known after apply)
+is_ca_certificate=true+key_algorithm="ECDSA"+private_key_pem=(sensitive value)
+ready_for_renewal=true+validity_end_time=(known after apply)
+validity_period_hours=876600+validity_start_time=(known after apply)
+subject {
+common_name="kubernetes-request-header-key-ca"
}
}
Plan:50 to add, 0 to change, 0 to destroy.
Changes to Outputs:+bootstrap_sa=(sensitive value)
+summary={
+ agents = [
+ {
+ annotations = []
+ labels = {
+ node.kubernetes.io/pool ="gpu"
}
+ name ="k3s-agent-0"+ taints = {
+ dedicated ="gpu:NoSchedule"
}
},
+ {
+ annotations = []
+ labels = {
+ node.kubernetes.io/pool ="general"
}
+ name ="k3s-agent-1"+ taints = {
+ dedicated =null
}
},
+ {
+ annotations = []
+ labels = {
+ node.kubernetes.io/pool ="general"
}
+ name ="k3s-agent-2"+ taints = {
+ dedicated =null
}
},
]
+ servers = [
+ {
+ annotations = {
+ server_id =0
}
+ labels = []
+ name ="k3s-control-plane-0"+ taints = []
},
+ {
+ annotations = {
+ server_id =1
}
+ labels = []
+ name ="k3s-control-plane-1"+ taints = []
},
+ {
+ annotations = {
+ server_id =2
}
+ labels = []
+ name ="k3s-control-plane-2"+ taints = []
},
]
+ version ="v1.21.2+k3s1"
}
─────────────────────────────────────────────────────────────────────────────
Note: You didn't use the -out option to save this plan, so Terraform can't
guarantee to take exactly these actions if you run "terraform apply" now.
The use of `cluster_domain` is clearer than just` name`, which could be interpreted as something
else.
BREAKING CHANGE: Deprecation of `name` variable
fix#53
Terraform used the selected providers to generate the following execution
plan. Resource actions are indicated with the following symbols:+ create
<= read (data resources)
Terraform will perform the following actions:# data.kubernetes_secret.sa_credentials will be read during apply# (config refers to values not yet known)<=data"kubernetes_secret""sa_credentials" {
+data=(sensitive value)
+id=(known after apply)
+type=(known after apply)
+metadata {
+generation=(known after apply)
+name=(known after apply)
+namespace="default"+resource_version=(known after apply)
+self_link=(known after apply)
+uid=(known after apply)
}
}
# hcloud_network.k3s will be created+resource"hcloud_network""k3s" {
+id=(known after apply)
+ip_range="10.0.0.0/8"+name="k3s-network"
}
# hcloud_network_subnet.k3s_nodes will be created+resource"hcloud_network_subnet""k3s_nodes" {
+gateway=(known after apply)
+id=(known after apply)
+ip_range="10.254.1.0/24"+network_id=(known after apply)
+network_zone="eu-central"+type="server"
}
# hcloud_server.agents[0] will be created+resource"hcloud_server""agents" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "agent"+"nodepool" = "gpu"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-agent-0"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.agents[1] will be created+resource"hcloud_server""agents" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "agent"+"nodepool" = "general"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-agent-1"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.agents[2] will be created+resource"hcloud_server""agents" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "agent"+"nodepool" = "general"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-agent-2"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.control_planes[0] will be created+resource"hcloud_server""control_planes" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "control-plane"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-control-plane-0"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.control_planes[1] will be created+resource"hcloud_server""control_planes" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "control-plane"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-control-plane-1"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server.control_planes[2] will be created+resource"hcloud_server""control_planes" {
+backup_window=(known after apply)
+backups=false+datacenter=(known after apply)
+id=(known after apply)
+image="ubuntu-20.04"+ipv4_address=(known after apply)
+ipv6_address=(known after apply)
+ipv6_network=(known after apply)
+keep_disk=false+labels={
+"engine" = "k3s"+"node_type" = "control-plane"+"provisioner" = "terraform"
}
+location=(known after apply)
+name="k3s-control-plane-2"+server_type="cx11-ceph"+ssh_keys=(known after apply)
+status=(known after apply)
}
# hcloud_server_network.agents_network[0] will be created+resource"hcloud_server_network""agents_network" {
+id=(known after apply)
+ip="10.254.1.4"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.agents_network[1] will be created+resource"hcloud_server_network""agents_network" {
+id=(known after apply)
+ip="10.254.1.5"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.agents_network[2] will be created+resource"hcloud_server_network""agents_network" {
+id=(known after apply)
+ip="10.254.1.6"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.control_planes[0] will be created+resource"hcloud_server_network""control_planes" {
+id=(known after apply)
+ip="10.254.1.1"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.control_planes[1] will be created+resource"hcloud_server_network""control_planes" {
+id=(known after apply)
+ip="10.254.1.2"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_server_network.control_planes[2] will be created+resource"hcloud_server_network""control_planes" {
+id=(known after apply)
+ip="10.254.1.3"+mac_address=(known after apply)
+server_id=(known after apply)
+subnet_id=(known after apply)
}
# hcloud_ssh_key.default will be created+resource"hcloud_ssh_key""default" {
+fingerprint=(known after apply)
+id=(known after apply)
+name="K3S terraform module - Provisionning SSH key"
}
# kubernetes_cluster_role_binding.boostrap will be created+resource"kubernetes_cluster_role_binding""boostrap" {
+id=(known after apply)
+metadata {
+generation=(known after apply)
+name="bootstrap"+resource_version=(known after apply)
+self_link=(known after apply)
+uid=(known after apply)
}
+role_ref {
+api_group="rbac.authorization.k8s.io"+kind="ClusterRole"+name="admin"
}
+subject {
+api_group=(known after apply)
+kind="ServiceAccount"+name="bootstrap"+namespace="default"
}
}
# kubernetes_service_account.bootstrap will be created+resource"kubernetes_service_account""bootstrap" {
+automount_service_account_token=true+default_secret_name=(known after apply)
+id=(known after apply)
+metadata {
+generation=(known after apply)
+name="bootstrap"+namespace="default"+resource_version=(known after apply)
+self_link=(known after apply)
+uid=(known after apply)
}
}
# module.k3s.null_resource.agents_drain["k3s-agent-0_node"] will be created+resource"null_resource""agents_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_drain["k3s-agent-1_node"] will be created+resource"null_resource""agents_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_drain["k3s-agent-2_node"] will be created+resource"null_resource""agents_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_install["k3s-agent-0_node"] will be created+resource"null_resource""agents_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "231c2099d0850d7cc82da54dad821067a7f24c4f"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.agents_install["k3s-agent-1_node"] will be created+resource"null_resource""agents_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "231c2099d0850d7cc82da54dad821067a7f24c4f"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.agents_install["k3s-agent-2_node"] will be created+resource"null_resource""agents_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "231c2099d0850d7cc82da54dad821067a7f24c4f"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.agents_label["k3s-agent-0_node|node.kubernetes.io/pool"] will be created+resource"null_resource""agents_label" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_label["k3s-agent-1_node|node.kubernetes.io/pool"] will be created+resource"null_resource""agents_label" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_label["k3s-agent-2_node|node.kubernetes.io/pool"] will be created+resource"null_resource""agents_label" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.agents_taint["k3s-agent-0_node|dedicated"] will be created+resource"null_resource""agents_taint" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[0] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[1] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[2] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[3] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[4] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.k8s_ca_certificates_install[5] will be created+resource"null_resource""k8s_ca_certificates_install" {
+id=(known after apply)
}
# module.k3s.null_resource.kubernetes_ready will be created+resource"null_resource""kubernetes_ready" {
+id=(known after apply)
}
# module.k3s.null_resource.servers_drain["k3s-control-plane-0"] will be created+resource"null_resource""servers_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.servers_drain["k3s-control-plane-1"] will be created+resource"null_resource""servers_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.servers_drain["k3s-control-plane-2"] will be created+resource"null_resource""servers_drain" {
+id=(known after apply)
+triggers=(known after apply)
}
# module.k3s.null_resource.servers_install["k3s-control-plane-0"] will be created+resource"null_resource""servers_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "5bbc5b363504fa478032f0d97c877f884a076d94"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.servers_install["k3s-control-plane-1"] will be created+resource"null_resource""servers_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "5bbc5b363504fa478032f0d97c877f884a076d94"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.null_resource.servers_install["k3s-control-plane-2"] will be created+resource"null_resource""servers_install" {
+id=(known after apply)
+triggers={
+"on_immutable_changes" = "5bbc5b363504fa478032f0d97c877f884a076d94"+"on_new_version" = "v1.21.2+k3s1"
}
}
# module.k3s.random_password.k3s_cluster_secret will be created+resource"random_password""k3s_cluster_secret" {
+id=(known after apply)
+length=48+lower=true+min_lower=0+min_numeric=0+min_special=0+min_upper=0+number=true+result=(sensitive value)
+special=false+upper=true
}
# module.k3s.tls_cert_request.master_user[0] will be created+resource"tls_cert_request""master_user" {
+cert_request_pem=(known after apply)
+id=(known after apply)
+key_algorithm="ECDSA"+private_key_pem=(sensitive value)
+subject {
+common_name="master-user"+organization="system:masters"
}
}
# module.k3s.tls_locally_signed_cert.master_user[0] will be created+resource"tls_locally_signed_cert""master_user" {
+allowed_uses=[
+"key_encipherment",
+"digital_signature",
+"client_auth",
]
+ca_cert_pem=(known after apply)
+ca_key_algorithm="ECDSA"+ca_private_key_pem=(sensitive value)
+cert_pem=(known after apply)
+cert_request_pem=(known after apply)
+early_renewal_hours=0+id=(known after apply)
+ready_for_renewal=true+validity_end_time=(known after apply)
+validity_period_hours=876600+validity_start_time=(known after apply)
}
# module.k3s.tls_private_key.kubernetes_ca[0] will be created+resource"tls_private_key""kubernetes_ca" {
+algorithm="ECDSA"+ecdsa_curve="P384"+id=(known after apply)
+private_key_pem=(sensitive value)
+public_key_fingerprint_md5=(known after apply)
+public_key_openssh=(known after apply)
+public_key_pem=(known after apply)
+rsa_bits=2048
}
# module.k3s.tls_private_key.kubernetes_ca[1] will be created+resource"tls_private_key""kubernetes_ca" {
+algorithm="ECDSA"+ecdsa_curve="P384"+id=(known after apply)
+private_key_pem=(sensitive value)
+public_key_fingerprint_md5=(known after apply)
+public_key_openssh=(known after apply)
+public_key_pem=(known after apply)
+rsa_bits=2048
}
# module.k3s.tls_private_key.kubernetes_ca[2] will be created+resource"tls_private_key""kubernetes_ca" {
+algorithm="ECDSA"+ecdsa_curve="P384"+id=(known after apply)
+private_key_pem=(sensitive value)
+public_key_fingerprint_md5=(known after apply)
+public_key_openssh=(known after apply)
+public_key_pem=(known after apply)
+rsa_bits=2048
}
# module.k3s.tls_private_key.master_user[0] will be created+resource"tls_private_key""master_user" {
+algorithm="ECDSA"+ecdsa_curve="P384"+id=(known after apply)
+private_key_pem=(sensitive value)
+public_key_fingerprint_md5=(known after apply)
+public_key_openssh=(known after apply)
+public_key_pem=(known after apply)
+rsa_bits=2048
}
# module.k3s.tls_self_signed_cert.kubernetes_ca_certs["0"] will be created+resource"tls_self_signed_cert""kubernetes_ca_certs" {
+allowed_uses=[
+"critical",
+"digitalSignature",
+"keyEncipherment",
+"keyCertSign",
]
+cert_pem=(known after apply)
+early_renewal_hours=0+id=(known after apply)
+is_ca_certificate=true+key_algorithm="ECDSA"+private_key_pem=(sensitive value)
+ready_for_renewal=true+validity_end_time=(known after apply)
+validity_period_hours=876600+validity_start_time=(known after apply)
+subject {
+common_name="kubernetes-client-ca"
}
}
# module.k3s.tls_self_signed_cert.kubernetes_ca_certs["1"] will be created+resource"tls_self_signed_cert""kubernetes_ca_certs" {
+allowed_uses=[
+"critical",
+"digitalSignature",
+"keyEncipherment",
+"keyCertSign",
]
+cert_pem=(known after apply)
+early_renewal_hours=0+id=(known after apply)
+is_ca_certificate=true+key_algorithm="ECDSA"+private_key_pem=(sensitive value)
+ready_for_renewal=true+validity_end_time=(known after apply)
+validity_period_hours=876600+validity_start_time=(known after apply)
+subject {
+common_name="kubernetes-server-ca"
}
}
# module.k3s.tls_self_signed_cert.kubernetes_ca_certs["2"] will be created+resource"tls_self_signed_cert""kubernetes_ca_certs" {
+allowed_uses=[
+"critical",
+"digitalSignature",
+"keyEncipherment",
+"keyCertSign",
]
+cert_pem=(known after apply)
+early_renewal_hours=0+id=(known after apply)
+is_ca_certificate=true+key_algorithm="ECDSA"+private_key_pem=(sensitive value)
+ready_for_renewal=true+validity_end_time=(known after apply)
+validity_period_hours=876600+validity_start_time=(known after apply)
+subject {
+common_name="kubernetes-request-header-key-ca"
}
}
Plan:50 to add, 0 to change, 0 to destroy.
Changes to Outputs:+bootstrap_sa=(sensitive value)
+summary={
+ agents = [
+ {
+ annotations = []
+ labels = {
+ node.kubernetes.io/pool ="gpu"
}
+ name ="k3s-agent-0"+ taints = {
+ dedicated ="gpu:NoSchedule"
}
},
+ {
+ annotations = []
+ labels = {
+ node.kubernetes.io/pool ="general"
}
+ name ="k3s-agent-1"+ taints = {
+ dedicated =null
}
},
+ {
+ annotations = []
+ labels = {
+ node.kubernetes.io/pool ="general"
}
+ name ="k3s-agent-2"+ taints = {
+ dedicated =null
}
},
]
+ servers = [
+ {
+ annotations = {
+ server_id =0
}
+ labels = []
+ name ="k3s-control-plane-0"+ taints = []
},
+ {
+ annotations = {
+ server_id =1
}
+ labels = []
+ name ="k3s-control-plane-1"+ taints = []
},
+ {
+ annotations = {
+ server_id =2
}
+ labels = []
+ name ="k3s-control-plane-2"+ taints = []
},
]
+ version ="v1.21.2+k3s1"
}
─────────────────────────────────────────────────────────────────────────────
Note: You didn't use the -out option to save this plan, so Terraform can't
guarantee to take exactly these actions if you run "terraform apply" now.
Add this suggestion to a batch that can be applied as a single commit.This suggestion is invalid because no changes were made to the code.Suggestions cannot be applied while the pull request is closed.Suggestions cannot be applied while viewing a subset of changes.Only one suggestion per line can be applied in a batch.Add this suggestion to a batch that can be applied as a single commit.Applying suggestions on deleted lines is not supported.You must change the existing code in this line in order to create a valid suggestion.Outdated suggestions cannot be applied.This suggestion has been applied or marked resolved.Suggestions cannot be applied from pending reviews.Suggestions cannot be applied on multi-line comments.Suggestions cannot be applied while the pull request is queued to merge.Suggestion cannot be applied right now. Please check back later.
No description provided.